} __cacheline_aligned;
static struct percpu_ctxt percpu_ctxt[NR_CPUS];
+static void paravirt_ctxt_switch_from(struct vcpu *v);
+static void paravirt_ctxt_switch_to(struct vcpu *v);
+
static void continue_idle_domain(struct vcpu *v)
{
reset_stack_and_jump(idle_loop);
v->arch.schedule_tail = continue_nonidle_domain;
}
+ v->arch.ctxt_switch_from = paravirt_ctxt_switch_from;
+ v->arch.ctxt_switch_to = paravirt_ctxt_switch_to;
+
v->arch.perdomain_ptes =
d->arch.mm_perdomain_pt + (vcpu_id << GDT_LDT_VCPU_SHIFT);
percpu_ctxt[smp_processor_id()].dirty_segment_mask = dirty_segment_mask;
}
-#define switch_kernel_stack(_n,_c) ((void)0)
+#define switch_kernel_stack(v) ((void)0)
#elif defined(__i386__)
#define load_segments(n) ((void)0)
#define save_segments(p) ((void)0)
-static inline void switch_kernel_stack(struct vcpu *n, unsigned int cpu)
+static inline void switch_kernel_stack(struct vcpu *v)
{
- struct tss_struct *tss = &init_tss[cpu];
- tss->esp1 = n->arch.guest_context.kernel_sp;
- tss->ss1 = n->arch.guest_context.kernel_ss;
+ struct tss_struct *tss = &init_tss[smp_processor_id()];
+ tss->esp1 = v->arch.guest_context.kernel_sp;
+ tss->ss1 = v->arch.guest_context.kernel_ss;
}
-#endif
+#endif /* __i386__ */
+
+static void paravirt_ctxt_switch_from(struct vcpu *v)
+{
+ save_segments(v);
+}
+
+static void paravirt_ctxt_switch_to(struct vcpu *v)
+{
+ set_int80_direct_trap(v);
+ switch_kernel_stack(v);
+}
#define loaddebug(_v,_reg) \
__asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
stack_regs,
CTXT_SWITCH_STACK_BYTES);
unlazy_fpu(p);
- if ( !hvm_guest(p) )
- {
- save_segments(p);
- }
- else
- {
- hvm_save_segments(p);
- hvm_load_msrs();
- }
+ p->arch.ctxt_switch_from(p);
}
if ( !is_idle_vcpu(n) )
loaddebug(&n->arch.guest_context, 7);
}
- if ( !hvm_guest(n) )
- {
- set_int80_direct_trap(n);
- switch_kernel_stack(n, cpu);
- }
- else
- {
- hvm_restore_msrs(n);
- }
+ n->arch.ctxt_switch_to(n);
}
if ( p->domain != n->domain )
return 1;
}
-void svm_store_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
+static void svm_store_cpu_guest_regs(
+ struct vcpu *v, struct cpu_user_regs *regs)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
#endif
}
-void svm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
+static void svm_load_cpu_guest_regs(
+ struct vcpu *v, struct cpu_user_regs *regs)
{
svm_load_cpu_user_regs(v, regs);
}
-#ifdef __x86_64__
-
-void svm_save_segments(struct vcpu *v)
-{
-}
-void svm_load_msrs(void)
-{
-}
-void svm_restore_msrs(struct vcpu *v)
-{
-}
-#endif
-
#define IS_CANO_ADDRESS(add) 1
static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
hvm_funcs.store_cpu_guest_regs = svm_store_cpu_guest_regs;
hvm_funcs.load_cpu_guest_regs = svm_load_cpu_guest_regs;
-#ifdef __x86_64__
- hvm_funcs.save_segments = svm_save_segments;
- hvm_funcs.load_msrs = svm_load_msrs;
- hvm_funcs.restore_msrs = svm_restore_msrs;
-#endif
-
hvm_funcs.store_cpu_guest_ctrl_regs = svm_store_cpu_guest_ctrl_regs;
hvm_funcs.modify_guest_state = svm_modify_guest_state;
reset_stack_and_jump(svm_asm_do_launch);
}
+static void svm_ctxt_switch_from(struct vcpu *v)
+{
+}
+
+static void svm_ctxt_switch_to(struct vcpu *v)
+{
+}
+
void svm_final_setup_guest(struct vcpu *v)
{
- v->arch.schedule_tail = arch_svm_do_launch;
+ v->arch.schedule_tail = arch_svm_do_launch;
+ v->arch.ctxt_switch_from = svm_ctxt_switch_from;
+ v->arch.ctxt_switch_to = svm_ctxt_switch_to;
if (v == v->domain->vcpu[0])
{
static unsigned long trace_values[NR_CPUS][4];
#define TRACE_VMEXIT(index,value) trace_values[smp_processor_id()][index]=value
+static void vmx_ctxt_switch_from(struct vcpu *v);
+static void vmx_ctxt_switch_to(struct vcpu *v);
+
void vmx_final_setup_guest(struct vcpu *v)
{
- v->arch.schedule_tail = arch_vmx_do_launch;
+ v->arch.schedule_tail = arch_vmx_do_launch;
+ v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
+ v->arch.ctxt_switch_to = vmx_ctxt_switch_to;
if ( v->vcpu_id == 0 )
{
}
#ifdef __x86_64__
+
static struct vmx_msr_state percpu_msr[NR_CPUS];
static u32 msr_data_index[VMX_MSR_COUNT] =
MSR_SYSCALL_MASK, MSR_EFER,
};
-void vmx_save_segments(struct vcpu *v)
+static void vmx_save_segments(struct vcpu *v)
{
rdmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.msr_content.shadow_gs);
}
* are not modified once set for generic domains, we don't save them,
* but simply reset them to the values set at percpu_traps_init().
*/
-void vmx_load_msrs(void)
+static void vmx_load_msrs(void)
{
struct vmx_msr_state *host_state = &percpu_msr[smp_processor_id()];
int i;
return 1;
}
-void
-vmx_restore_msrs(struct vcpu *v)
+static void vmx_restore_msrs(struct vcpu *v)
{
int i = 0;
struct vmx_msr_state *guest_state;
HVM_DBG_LOG(DBG_LEVEL_2,
"restore guest's index %d msr %lx with %lx\n",
- i, (unsigned long) msr_data_index[i], (unsigned long) guest_state->msr_items[i]);
+ i, (unsigned long)msr_data_index[i],
+ (unsigned long)guest_state->msr_items[i]);
set_bit(i, &host_state->flags);
wrmsrl(msr_data_index[i], guest_state->msr_items[i]);
clear_bit(i, &guest_flags);
}
}
#else /* __i386__ */
-#define vmx_save_init_msrs() ((void)0)
-static inline int long_mode_do_msr_read(struct cpu_user_regs *regs){
+#define vmx_save_segments(v) ((void)0)
+#define vmx_load_msrs() ((void)0)
+#define vmx_restore_msrs(v) ((void)0)
+#define vmx_save_init_msrs() ((void)0)
+
+static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
+{
return 0;
}
-static inline int long_mode_do_msr_write(struct cpu_user_regs *regs){
+
+static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
+{
return 0;
}
-#endif
+
+#endif /* __i386__ */
+
+static void vmx_ctxt_switch_from(struct vcpu *v)
+{
+ vmx_save_segments(v);
+ vmx_load_msrs();
+}
+
+static void vmx_ctxt_switch_to(struct vcpu *v)
+{
+ vmx_restore_msrs(v);
+}
void stop_vmx(void)
{
hvm_funcs.store_cpu_guest_regs = vmx_store_cpu_guest_regs;
hvm_funcs.load_cpu_guest_regs = vmx_load_cpu_guest_regs;
-#ifdef __x86_64__
- hvm_funcs.save_segments = vmx_save_segments;
- hvm_funcs.load_msrs = vmx_load_msrs;
- hvm_funcs.restore_msrs = vmx_restore_msrs;
-#endif
-
hvm_funcs.store_cpu_guest_ctrl_regs = vmx_store_cpu_guest_ctrl_regs;
hvm_funcs.modify_guest_state = vmx_modify_guest_state;
void (*schedule_tail) (struct vcpu *);
+ void (*ctxt_switch_from) (struct vcpu *);
+ void (*ctxt_switch_to) (struct vcpu *);
+
/* Bounce information for propagating an exception to guest OS. */
struct trap_bounce trap_bounce;
/*
* Store and load guest state:
* 1) load/store guest register state,
- * 2) load/store segment state (x86_64 only),
- * 3) load/store msr register state (x86_64 only),
- * 4) store guest control register state (used for panic dumps),
- * 5) modify guest state (e.g., set debug flags).
+ * 2) store guest control register state (used for panic dumps),
+ * 3) modify guest state (e.g., set debug flags).
*/
void (*store_cpu_guest_regs)(struct vcpu *v, struct cpu_user_regs *r);
void (*load_cpu_guest_regs)(struct vcpu *v, struct cpu_user_regs *r);
-#ifdef __x86_64__
- void (*save_segments)(struct vcpu *v);
- void (*load_msrs)(void);
- void (*restore_msrs)(struct vcpu *v);
-#endif
void (*store_cpu_guest_ctrl_regs)(struct vcpu *v, unsigned long crs[8]);
void (*modify_guest_state)(struct vcpu *v);
hvm_funcs.load_cpu_guest_regs(v, r);
}
-#ifdef __x86_64__
-static inline void
-hvm_save_segments(struct vcpu *v)
-{
- if (hvm_funcs.save_segments)
- hvm_funcs.save_segments(v);
-}
-
-static inline void
-hvm_load_msrs(void)
-{
- if (hvm_funcs.load_msrs)
- hvm_funcs.load_msrs();
-}
-
-static inline void
-hvm_restore_msrs(struct vcpu *v)
-{
- if (hvm_funcs.restore_msrs)
- hvm_funcs.restore_msrs(v);
-}
-#else
-#define hvm_save_segments(v) ((void)0)
-#define hvm_load_msrs(v) ((void)0)
-#define hvm_restore_msrs(v) ((void)0)
-#endif /* __x86_64__ */
-
static inline void
hvm_store_cpu_guest_ctrl_regs(struct vcpu *v, unsigned long crs[8])
{